[IA64] cache flush
authorawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Mon, 3 Apr 2006 14:33:35 +0000 (08:33 -0600)
committerawilliam@xenbuild.aw <awilliam@xenbuild.aw>
Mon, 3 Apr 2006 14:33:35 +0000 (08:33 -0600)
domain_cache_flush added.
SAL_CACHE_FLUSH implemented.

Signed-off-by: Tristan Gingold <tristan.gingold@bull.net>
xen/arch/ia64/linux-xen/setup.c
xen/arch/ia64/xen/Makefile
xen/arch/ia64/xen/dom_fw.c
xen/arch/ia64/xen/domain.c
xen/arch/ia64/xen/flushd.S [new file with mode: 0644]
xen/include/asm-ia64/domain.h
xen/include/asm-ia64/vhpt.h
xen/include/asm-ia64/xenpage.h

index ce5dee42ddfbaf7e4274a46f3f8cbeb6e4b252c7..da3e9137939b32edb8221dcafb3fe1b4e277cbda 100644 (file)
@@ -105,6 +105,11 @@ extern void early_cmdline_parse(char **);
 #define        I_CACHE_STRIDE_SHIFT    5       /* Safest way to go: 32 bytes by 32 bytes */
 unsigned long ia64_i_cache_stride_shift = ~0;
 
+#ifdef XEN
+#define D_CACHE_STRIDE_SHIFT   5       /* Safest.  */
+unsigned long ia64_d_cache_stride_shift = ~0;
+#endif
+
 /*
  * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1).  This
  * mask specifies a mask of address bits that must be 0 in order for two buffers to be
@@ -718,6 +723,9 @@ get_max_cacheline_size (void)
                 max = SMP_CACHE_BYTES;
                /* Safest setup for "flush_icache_range()" */
                ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
+#ifdef XEN
+               ia64_d_cache_stride_shift = D_CACHE_STRIDE_SHIFT;
+#endif
                goto out;
         }
 
@@ -733,6 +741,10 @@ get_max_cacheline_size (void)
                        cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
                        cci.pcci_unified = 1;
                }
+#ifdef XEN
+               if (cci.pcci_stride < ia64_d_cache_stride_shift)
+                       ia64_d_cache_stride_shift = cci.pcci_stride;
+#endif
                line_size = 1 << cci.pcci_line_size;
                if (line_size > max)
                        max = line_size;
@@ -754,6 +766,11 @@ get_max_cacheline_size (void)
   out:
        if (max > ia64_max_cacheline_size)
                ia64_max_cacheline_size = max;
+#ifdef XEN
+       if (ia64_d_cache_stride_shift > ia64_i_cache_stride_shift)
+               ia64_d_cache_stride_shift = ia64_i_cache_stride_shift;
+#endif
+
 }
 
 /*
index 1d3d80b3474a7d030533057ff35bbb1753436bc0..56d9328b41ea4828bb9750c746365600218ef288 100644 (file)
@@ -24,6 +24,7 @@ obj-y += xenmem.o
 obj-y += xenmisc.o
 obj-y += xensetup.o
 obj-y += xentime.o
+obj-y += flushd.o
 
 obj-$(crash_debug) += gdbstub.o
 
index 07a8affaefaaa17678075fe78a1fb8d499a8943e..7d86f00bf8b3670a0bbfda36c80511768b3bb124 100644 (file)
@@ -176,7 +176,9 @@ sal_emulator (long index, unsigned long in1, unsigned long in2,
                printf("*** CALLED SAL_MC_SET_PARAMS.  IGNORED...\n");
                break;
            case SAL_CACHE_FLUSH:
-               printf("*** CALLED SAL_CACHE_FLUSH.  IGNORED...\n");
+               /*  The best we can do is to flush with fc all the domain.  */
+               domain_cache_flush (current->domain, in1 == 4 ? 1 : 0);
+               status = 0;
                break;
            case SAL_CACHE_INIT:
                printf("*** CALLED SAL_CACHE_INIT.  IGNORED...\n");
index c4926421f3a966d4c2b8a8f7554190440af343b6..d3c32ffa28bf9b3cb29a5b677e9898177efeff0a 100644 (file)
@@ -674,6 +674,61 @@ tryagain:
        return 0;
 }
 
+/* Flush cache of domain d.  */
+void domain_cache_flush (struct domain *d, int sync_only)
+{
+       struct mm_struct *mm = d->arch.mm;
+       pgd_t *pgd = mm->pgd;
+       unsigned long maddr;
+       int i,j,k, l;
+       int nbr_page = 0;
+       void (*flush_func)(unsigned long start, unsigned long end);
+       extern void flush_dcache_range (unsigned long, unsigned long);
+
+       if (sync_only)
+               flush_func = &flush_icache_range;
+       else
+               flush_func = &flush_dcache_range;
+
+#ifdef CONFIG_DOMAIN0_CONTIGUOUS
+       if (d == dom0) {
+               /* This is not fully correct (because of hole), but it should
+                  be enough for now.  */
+               (*flush_func)(__va_ul (dom0_start),
+                             __va_ul (dom0_start + dom0_size));
+               return;
+       }
+#endif
+       for (i = 0; i < PTRS_PER_PGD; pgd++, i++) {
+               pud_t *pud;
+               if (!pgd_present(*pgd))
+                       continue;
+               pud = pud_offset(pgd, 0);
+               for (j = 0; j < PTRS_PER_PUD; pud++, j++) {
+                       pmd_t *pmd;
+                       if (!pud_present(*pud))
+                               continue;
+                       pmd = pmd_offset(pud, 0);
+                       for (k = 0; k < PTRS_PER_PMD; pmd++, k++) {
+                               pte_t *pte;
+                               if (!pmd_present(*pmd))
+                                       continue;
+                               pte = pte_offset_map(pmd, 0);
+                               for (l = 0; l < PTRS_PER_PTE; pte++, l++) {
+                                       if (!pte_present(*pte))
+                                               continue;
+                                       /* Convert PTE to maddr.  */
+                                       maddr = __va_ul (pte_val(*pte)
+                                                        & _PAGE_PPN_MASK);
+                                       (*flush_func)(maddr, maddr+ PAGE_SIZE);
+                                       nbr_page++;
+                               }
+                       }
+               }
+       }
+       //printf ("domain_cache_flush: %d %d pages\n", d->domain_id, nbr_page);
+}
+
 // FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE
 #if 1
 unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr)
diff --git a/xen/arch/ia64/xen/flushd.S b/xen/arch/ia64/xen/flushd.S
new file mode 100644 (file)
index 0000000..1a83e79
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Cache flushing routines.
+ *
+ * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@hpl.hp.com>
+ *
+ * 05/28/05 Zoltan Menyhart    Dynamic stride size
+ * 03/31/06 Tristan Gingold     copied and modified for dcache.
+ */
+
+#include <asm/asmmacro.h>
+
+
+       /*
+        * flush_dcache_range(start,end)
+        *
+        *      Flush cache.
+        *
+        *      Must deal with range from start to end-1 but nothing else (need to
+        *      be careful not to touch addresses that may be unmapped).
+        *
+        *      Note: "in0" and "in1" are preserved for debugging purposes.
+        */
+GLOBAL_ENTRY(flush_dcache_range)
+
+       .prologue
+       alloc   r2=ar.pfs,2,0,0,0
+       movl    r3=ia64_d_cache_stride_shift
+       mov     r21=1
+       ;;
+       ld8     r20=[r3]                // r20: stride shift
+       sub     r22=in1,r0,1            // last byte address
+       ;;
+       shr.u   r23=in0,r20             // start / (stride size)
+       shr.u   r22=r22,r20             // (last byte address) / (stride size)
+       shl     r21=r21,r20             // r21: stride size of the i-cache(s)
+       ;;
+       sub     r8=r22,r23              // number of strides - 1
+       shl     r24=r23,r20             // r24: addresses for "fc" =
+                                       //      "start" rounded down to stride boundary
+       .save   ar.lc,r3
+       mov     r3=ar.lc                // save ar.lc
+       ;;
+
+       .body
+       mov     ar.lc=r8
+       ;;
+       /*
+        * 32 byte aligned loop, even number of (actually 2) bundles
+        */
+.Loop: fc      r24                     // issuable on M0 only
+       add     r24=r21,r24             // we flush "stride size" bytes per iteration
+       nop.i   0
+       br.cloop.sptk.few .Loop
+       ;;
+       sync.i
+       ;;
+       srlz.i
+       ;;
+       mov     ar.lc=r3                // restore ar.lc
+       br.ret.sptk.many rp
+END(flush_dcache_range)
index ce834eed95279819e29e35af61aa242d20f0e54f..c255583f0ca6db56a7adc278ea08fe0896c502eb 100644 (file)
 
 extern void domain_relinquish_resources(struct domain *);
 
+/* Flush cache of domain d.
+   If sync_only is true, only synchronize I&D caches,
+   if false, flush and invalidate caches.  */
+extern void domain_cache_flush (struct domain *d, int sync_only);
+
 struct arch_domain {
     struct mm_struct *mm;
     unsigned long metaphysical_rr0;
index 85216541d49696625d9b6cde514d4d0b3ebff8db..17658b9f823d0aeebe42fd7525e40d3653be051d 100644 (file)
@@ -127,6 +127,7 @@ extern void vhpt_multiple_insert(unsigned long vaddr, unsigned long pte,
 extern void vhpt_insert (unsigned long vadr, unsigned long ptr,
                         unsigned logps);
 extern void vhpt_flush(void);
+extern void smp_vhpt_flush_all(void);
 
 /* Currently the VHPT is allocated per CPU.  */
 DECLARE_PER_CPU (unsigned long, vhpt_paddr);
index 1a6f8e693f92defda9fd6929cca93f7f32b98edd..1f7f1bf7c12fc6e02fdbcdf58034b291eee836e3 100644 (file)
@@ -55,6 +55,9 @@ static inline int get_order_from_pages(unsigned long nr_pages)
 #define __pa(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
 #define __va(x)                ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
 
+/* It is sometimes very useful to have unsigned long as result.  */
+#define __va_ul(x)     ({xen_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.l;})
+
 #undef PAGE_OFFSET
 #define PAGE_OFFSET    __IA64_UL_CONST(0xf000000000000000)